x86: No need to flush TLBs on free_page_type() as we no longer trust
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 4 Feb 2009 14:28:13 +0000 (14:28 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 4 Feb 2009 14:28:13 +0000 (14:28 +0000)
the linear pagetable mapping (we use it but we double check it).

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/mm.c

index 6fe26a3468e2bdc2fb97d080d6fb884a86b6e26d..c867b4ca9a83d4132e82a8bc530dbcaf5a51baf3 100644 (file)
@@ -2023,30 +2023,17 @@ int free_page_type(struct page_info *page, unsigned long type,
     unsigned long gmfn;
     int rc;
 
-    if ( likely(owner != NULL) )
+    if ( likely(owner != NULL) && unlikely(paging_mode_enabled(owner)) )
     {
-        /*
-         * We have to flush before the next use of the linear mapping
-         * (e.g., update_va_mapping()) or we could end up modifying a page
-         * that is no longer a page table (and hence screw up ref counts).
-         */
-        if ( current->domain == owner )
-            queue_deferred_ops(owner, DOP_FLUSH_ALL_TLBS);
-        else
-            flush_tlb_mask(owner->domain_dirty_cpumask);
-
-        if ( unlikely(paging_mode_enabled(owner)) )
-        {
-            /* A page table is dirtied when its type count becomes zero. */
-            paging_mark_dirty(owner, page_to_mfn(page));
+        /* A page table is dirtied when its type count becomes zero. */
+        paging_mark_dirty(owner, page_to_mfn(page));
 
-            if ( shadow_mode_refcounts(owner) )
-                return 0;
+        if ( shadow_mode_refcounts(owner) )
+            return 0;
 
-            gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
-            ASSERT(VALID_M2P(gmfn));
-            shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
-        }
+        gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
+        ASSERT(VALID_M2P(gmfn));
+        shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
     }
 
     if ( !(type & PGT_partial) )